Last updated: 2019-11-03
Checks: 3 2
Knit directory: ~/Desktop/Ania_snakemake/raporty_preprocessing/analysis/
This reproducible R Markdown analysis was created with workflowr (version 1.3.0). The Checks tab describes the reproducibility checks that were applied when the results were created. The Past versions tab lists the development history.
Great job! The global environment was empty. Objects defined in the global environment can affect the analysis in your R Markdown file in unknown ways. For reproduciblity it’s best to always run the code in an empty environment.
The command set.seed(12345) was run prior to running the code in the R Markdown file. Setting a seed ensures that any results that rely on randomness, e.g. subsampling or permutations, are reproducible.
Great job! Recording the operating system, R version, and package versions is critical for reproducibility.
To ensure reproducibility of the results, delete the cache directory khan_cache and re-run the analysis. To have workflowr automatically delete the cache directory prior to building the file, set delete_cache = TRUE when running wflow_build() or wflow_publish().
Tracking code development and connecting the code version to the results is critical for reproducibility. To start using Git, open the Terminal and type git init in your project directory.
This project is not being versioned with Git. To obtain the full reproducibility benefits of using workflowr, please see ?wflow_start.
suppressPackageStartupMessages({
library(plotly)
library(readr)
library(stringr)
library(edgeR)
library(stringr)
library(pheatmap)
library(purrr)
library(scater)
library(dplyr)
library(reshape2)
library(ggplot2)
library(cowplot)
library(Matrix)
library(scran)
library(Seurat)
library(sctransform)
library(readxl)
library(DropletUtils)
# library(LSD)
library(CellMixS)
library(tibble)
library(ExperimentHub)
})
# Parameters
out_path <- here::here("output")
seed <- 1234
sc<-ExperimentHub()
Warning: `overscope_eval_next()` is deprecated as of rlang 0.2.0.
Please use `eval_tidy()` with a data mask instead.
This warning is displayed once per session.
Warning: `overscope_clean()` is deprecated as of rlang 0.2.0.
This warning is displayed once per session.
sce<-sc[["EH2259"]]
Warning: package 'muscData' is not available (for R version 3.6.0)
## Filter out genes that are not expressed in any cell
sce <- sce[which(rowSums(counts(sce) > 0) > 0), ]
dim(sce)
[1] 18890 29065
sce$Sample<-sce$ind
sce$dataset<-sce$ind
# Mitochondrial genes
is.mito <- grepl("^MT", rowData(sce)$SYMBOL, ignore.case=T)
summary(is.mito)
Mode FALSE TRUE
logical 18812 78
rownames(sce)[is.mito]
[1] "MTOR" "MTHFR"
[3] "MTFR1L" "MTF1"
[5] "MTF2" "MTMR11_ENSG00000014914"
[7] "MTX1" "MTR"
[9] "MTA3" "MTIF2"
[11] "MTHFD2" "MTX2"
[13] "MTERFD2" "MTMR14"
[15] "MTRNR2L12" "MTHFD2L"
[17] "MTTP" "MTRR"
[19] "MTMR12" "MTX3"
[21] "MTRNR2L2" "MTCH1"
[23] "MTRNR2L9" "MTO1"
[25] "MTFR2" "MTHFD1L"
[27] "MTRF1L" "MTURN"
[29] "MTERF" "MTPN"
[31] "MTMR8" "MTM1_ENSG00000171100"
[33] "MTMR1_ENSG00000063601" "MTCP1_ENSG00000214827"
[35] "MTMR9" "MTMR7"
[37] "MTUS1" "MTFR1"
[39] "MTERFD1" "MTDH"
[41] "MTBP" "MTSS1"
[43] "MTAP" "MTPAP"
[45] "MTRNR2L5" "MTG1"
[47] "MTRNR2L8" "MTCH2"
[49] "MTA2" "MTL5"
[51] "MTMR2" "MTERFD3"
[53] "MTMR6" "MTIF3"
[55] "MTRF1" "MTHFD1"
[57] "MTA1_ENSG00000182979" "MTMR10"
[59] "MTFMT" "MTHFS"
[61] "MTRNR2L4" "MT3"
[63] "MT2A" "MT1E"
[65] "MT1M" "MT1A"
[67] "MT1F" "MT1G"
[69] "MT1H" "MT1X"
[71] "MTSS1L" "MTHFSD"
[73] "MTRNR2L1" "MTMR4"
[75] "MTRNR2L3" "MTG2"
[77] "MTMR3" "MTFP1"
sce <- calculateQCMetrics(sce, feature_controls=list(Mt = is.mito), percent_top=c(20,50,100,200))
plotHighestExprs(sce,n=20)
# Find outlier
# Plot filters
plotFilters <- function( sce, var="log10_total_counts", split_by="Sample", nrow=NULL,
nmads=c(2,3,5), lt=c("dashed","dotted","dotdash"), xscale="free" ){
CD <- as.data.frame(colData(sce))
if(!(var %in% colnames(CD))) stop(paste("`var`",var,"is not in `colData(sce)`!"))
if(!is.null(split_by) && !(split_by %in% colnames(CD))){
stop(paste("`split_by`",split_by,"is not in `colData(sce)`!"))
}
library(ggplot2)
library(cowplot)
d <- CD[,var,drop=F]
if(!is.null(split_by)) d$dataset <- CD[[split_by]]
p <- ggplot(d, aes_string(x=var)) + geom_histogram(color="darkblue", bins=30)
if(xscale!="free"){
if(xscale!="fixed"){
if(xscale>1 && xscale%%1==0){
xq <- .tmads(d[[var]], xscale)
xr <- range(d[[var]],na.rm=T)
xq <- c(max(xq[1],xr[1]), min(xq[2],xr[2]))
}else{
if(xscale<=1 & xscale>0){
xscale <- (1-xscale)/2
xq <- quantile(d[[var]], probs=c(xscale,1-xscale), na.rm=T)
}else{
stop("Wrong `xscale` value!")
}
}
p <- p + xlim(xq[1], xq[2])
}
}
if(!is.null(split_by)){
if(is.null(nrow)) nrow <- ceiling(length(unique(d$dataset))/3)
p <- p + facet_wrap(~dataset, scales=ifelse(xscale=="free","free","free_y"), nrow=nrow)
for(ds in unique(d$dataset)){
for(i in 1:length(nmads)){
ma <- .tmads(d[which(d$dataset==ds),var], nmads[i])
df2 <- data.frame(xint=as.numeric(ma), dataset=rep(ds,2))
p <- p + geom_vline(data=df2, aes(xintercept=xint), linetype=lt[i])
}
}
}else{
for(i in 1:length(nmads)){
df2 <- data.frame(xint=as.numeric(.tmads(d[[var]], nmads[i])))
p <- p + geom_vline(data=df2, aes(xintercept=xint), linetype=lt[i])
}
}
p
}
.tmads <- function(x, nbmads=2.5){
x2 <- nbmads*median(abs(x-median(x)))
median(x)+c(-x2,x2)
}
plotFilters(sce)
plotFilters(sce, "log10_total_features_by_counts")
plotFilters(sce, "pct_counts_Mt", xscale=0.98)
Warning: Removed 291 rows containing non-finite values (stat_bin).
Warning: Removed 16 rows containing missing values (geom_bar).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 2 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
Warning: Removed 1 rows containing missing values (geom_vline).
#Filtering
sce$total_counts_drop <- isOutlier(sce$total_counts, nmads = 2.5,
type = "both", log = TRUE, batch=sce$dataset)
sce$total_features_drop <- isOutlier(sce$total_features_by_counts, nmads = 2.5,
type = "both", log = TRUE, batch=sce$dataset)
sce$mito_drop <- sce$pct_counts_Mt > .5 &
isOutlier(sce$pct_counts_Mt, nmads = 2.5, type = "higher", batch=sce$dataset)
sce$isOutlier <- sce$total_counts_drop | sce$total_features_drop | sce$mito_drop
# quality plot
# Find outlier
outlierPlot <- function(cd, feature, aph=NULL, logScale=FALSE, show.legend=TRUE){
if(is.null(aph)) aph <- paste0(feature, "_drop")
if(!(aph %in% colnames(cd))) aph <- NULL
p <- ggplot(as.data.frame(cd), aes_string(x = feature, alpha = aph)) +
geom_histogram(show.legend=show.legend)
if(!is.null(aph)) p <- p + scale_alpha_manual(values = c("TRUE" = 0.4, "FALSE" = 1))
if(logScale) p <- p + scale_x_log10()
p
}
plQCplot <- function(cd, show.legend=TRUE){
ps <- lapply(split(cd,cd$dataset), sl=show.legend, FUN=function(x,sl){
list( outlierPlot( x, "total_counts", logScale=T, show.legend=sl),
outlierPlot( x, "total_features_by_counts", "total_features_drop",
logScale=T, show.legend=sl),
outlierPlot( x, "pct_counts_Mt", "mito_drop", show.legend=sl)
)
})
plot_grid( plotlist = do.call(c, ps),
labels=rep(basename(names(ps)), each=length(ps[[1]])),
ncol=length(ps[[1]]),
label_x=0.5 )
}
plQCplot(colData(sce), show.legend=FALSE)
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
ggplot(colData(sce) %>% as.data.frame, aes(x=total_features_by_counts, y=total_counts, colour=pct_counts_Mt)) + geom_point() + facet_wrap(~Sample) +geom_density_2d(col="white") + scale_x_sqrt() + scale_y_sqrt()
ggplot(colData(sce) %>% as.data.frame, aes(x=total_features_by_counts, y=pct_counts_Mt)) + geom_point() + facet_wrap(~Sample) +geom_density_2d(col="white")
# Check outlier
mets <- c("total_counts_drop","total_features_drop","mito_drop")
sapply(mets, FUN=function(x){ sapply(mets, y=x, function(x,y){ sum(sce[[x]] & sce[[y]]) }) })
total_counts_drop total_features_drop mito_drop
total_counts_drop 757 523 4
total_features_drop 523 1070 6
mito_drop 4 6 691
nbcells <- cbind(table(sce$Sample),table(sce$Sample[!sce$isOutlier]))
colnames(nbcells) <- c("cells total","cells after filtering")
nbcells
cells total cells after filtering
101 2390 2314
107 1286 1257
1015 5841 5573
1016 4178 3827
1039 1349 1144
1244 4005 3732
1256 4726 4427
1488 5290 4803
layout(matrix(1:2,nrow=1))
LSD::heatscatter( sce$total_counts, sce$total_features_by_counts, xlab="Total counts", ylab="Non-zero features", main="",log="xy")
w <- which(!sce$isOutlier)
LSD::heatscatter( sce$total_counts[w], sce$total_features_by_counts[w], xlab="Total counts", ylab="Non-zero features", main="Filtered cells",log="xy")
# summary of cells kept
cct <- table(sce$isOutlier, sce$Sample)
row.names(cct) <- c("Kept", "Filtered out")
cct
101 107 1015 1016 1039 1244 1256 1488
Kept 2314 1257 5573 3827 1144 3732 4427 4803
Filtered out 76 29 268 351 205 273 299 487
# drop outlier cells
sce <- sce[,!sce$isOutlier]
# require count > 1 in at least 20 cells
sce <- sce[which(rowSums(counts(sce)>1)>=20),]
dim(sce)
[1] 5722 27077
# Sample filtering
sce<-sce[,!sce$Sample %in% c("101","107")]
plQCplot(colData(sce), show.legend=FALSE)
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
dim(sce)
[1] 5722 23506
table(sce$dataset)
1015 1016 1039 1244 1256 1488
5573 3827 1144 3732 4427 4803
# Scater
set.seed(1000)
clusters <- quickCluster(sce, use.ranks=FALSE)
table(clusters)
clusters
1 2 3 4 5 6 7 8 9 10 11 12
2900 1844 351 1720 1862 5199 2413 1365 1364 2758 1550 180
sce <- computeSumFactors(sce, min.mean=0.1, cluster=clusters) ##cluster information added
sce <- scater::normalize(sce)
# create SeuratObject
seurat <- CreateSeuratObject(counts = counts(sce), meta.data = as.data.frame(colData(sce)), min.cells = 0, min.features = 0, project = "10X", names.delim = ".")
Warning: Feature names cannot have underscores ('_'), replacing with dashes
('-')
# normalize, find variable genes, and scale
sl <- lapply(unique(as.character(seurat@meta.data$dataset)), FUN=function(x){
x <- NormalizeData(SubsetData(seurat, cells=which(seurat@meta.data$dataset==x)))
ScaleData(x)
x <- FindVariableFeatures(x, verbose=F)
# use non-standardized variance
v <- x@assays$RNA@meta.features[["variance"]]
VariableFeatures(x) <- row.names(x@assays$RNA@meta.features)[order(v, decreasing=TRUE)[1:500]]
x
})
Centering and scaling data matrix
Centering and scaling data matrix
Centering and scaling data matrix
Centering and scaling data matrix
Centering and scaling data matrix
Centering and scaling data matrix
# find anchors & integrate
anchors <- FindIntegrationAnchors(sl)
Computing 2000 integration features
Scaling features for provided objects
Finding all pairwise anchors
Running CCA
Merging objects
Finding neighborhoods
Finding anchors
Found 11504 anchors
Filtering anchors
Retained 5994 anchors
Extracting within-dataset neighbors
Running CCA
Merging objects
Finding neighborhoods
Finding anchors
Found 12079 anchors
Filtering anchors
Retained 5414 anchors
Extracting within-dataset neighbors
Running CCA
Merging objects
Finding neighborhoods
Finding anchors
Found 13101 anchors
Filtering anchors
Retained 6510 anchors
Extracting within-dataset neighbors
Running CCA
Merging objects
Finding neighborhoods
Finding anchors
Found 4875 anchors
Filtering anchors
Retained 4060 anchors
Extracting within-dataset neighbors
Running CCA
Merging objects
Finding neighborhoods
Finding anchors
Found 4913 anchors
Filtering anchors
Retained 4153 anchors
Extracting within-dataset neighbors
Running CCA
Merging objects
Finding neighborhoods
Finding anchors
Found 4783 anchors
Filtering anchors
Retained 3917 anchors
Extracting within-dataset neighbors
Running CCA
Merging objects
Finding neighborhoods
Finding anchors
Found 12217 anchors
Filtering anchors
Retained 6283 anchors
Extracting within-dataset neighbors
Running CCA
Merging objects
Finding neighborhoods
Finding anchors
Found 13226 anchors
Filtering anchors
Retained 7070 anchors
Extracting within-dataset neighbors
Running CCA
Merging objects
Finding neighborhoods
Finding anchors
Found 13502 anchors
Filtering anchors
Retained 6853 anchors
Extracting within-dataset neighbors
Running CCA
Merging objects
Finding neighborhoods
Finding anchors
Found 5150 anchors
Filtering anchors
Retained 2753 anchors
Extracting within-dataset neighbors
Running CCA
Merging objects
Finding neighborhoods
Finding anchors
Found 10929 anchors
Filtering anchors
Retained 5677 anchors
Extracting within-dataset neighbors
Running CCA
Merging objects
Finding neighborhoods
Finding anchors
Found 11714 anchors
Filtering anchors
Retained 6475 anchors
Extracting within-dataset neighbors
Running CCA
Merging objects
Finding neighborhoods
Finding anchors
Found 12059 anchors
Filtering anchors
Retained 6417 anchors
Extracting within-dataset neighbors
Running CCA
Merging objects
Finding neighborhoods
Finding anchors
Found 4706 anchors
Filtering anchors
Retained 2673 anchors
Extracting within-dataset neighbors
Running CCA
Merging objects
Finding neighborhoods
Finding anchors
Found 12087 anchors
Filtering anchors
Retained 6829 anchors
Extracting within-dataset neighbors
seurat <- IntegrateData(anchorset = anchors)
Merging dataset 4 into 2
Extracting anchors for merged samples
Finding integration vectors
Finding integration vector weights
Integrating data
Merging dataset 6 into 5
Extracting anchors for merged samples
Finding integration vectors
Finding integration vector weights
Integrating data
Merging dataset 2 4 into 5 6
Extracting anchors for merged samples
Finding integration vectors
Finding integration vector weights
Integrating data
Merging dataset 1 into 5 6 2 4
Extracting anchors for merged samples
Finding integration vectors
Finding integration vector weights
Integrating data
Merging dataset 3 into 5 6 2 4 1
Extracting anchors for merged samples
Finding integration vectors
Finding integration vector weights
Integrating data
# scale integrated data
DefaultAssay(object=seurat) <- "integrated"
seurat <- ScaleData(seurat, verbose=F)
seurat <- RunPCA(object = seurat, npcs = 30, verbose = FALSE)
seurat <- RunTSNE(object = seurat, perplexity = 30,reduction = "pca", dims = seq_len(20),
seed.use = seed, do.fast = TRUE, verbose = FALSE)
seurat <- RunUMAP(object = seurat, reduction = "pca", dims = seq_len(20),
seed.use = seed, verbose = FALSE,n.neighbors = 30, min.dist = 0.5)
seurat <- FindNeighbors(object = seurat, reduction = "pca", dims = seq_len(20), verbose = FALSE)
for (res in c(0.1, 0.2, 0.4, 0.8, 1, 1.2, 2))
seurat <- FindClusters(object = seurat, resolution = res, random.seed = seed, verbose = FALSE)
seurat <- SetIdent(seurat, value="integrated_snn_res.0.2")
sce <- SingleCellExperiment(
assays=list(
counts=seurat@assays$RNA@counts,
logcounts=seurat@assays$RNA@data
),
colData=seurat@meta.data,
reducedDims=lapply(seurat@reductions, FUN=function(x) x@cell.embeddings)
)
# assign cluster to sce
identical(colnames(sce), colnames(seurat))
[1] TRUE
sce$seurat_cluster <- seurat@meta.data$seurat_clusters
# Save data
saveRDS(sce, file = '~/Desktop/Ania_snakemake/aktualny_projekt_moj_github/characterize_batches/output/sce_khang.rds')
saveRDS(seurat, file = '~/Desktop/Ania_snakemake/aktualny_projekt_moj_github/characterize_batches/output/seurat_khang.rds')
sessionInfo()
R version 3.6.0 (2019-04-26)
Platform: x86_64-apple-darwin15.6.0 (64-bit)
Running under: macOS Sierra 10.12.6
Matrix products: default
BLAS: /Library/Frameworks/R.framework/Versions/3.6/Resources/lib/libRblas.0.dylib
LAPACK: /Library/Frameworks/R.framework/Versions/3.6/Resources/lib/libRlapack.dylib
locale:
[1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
attached base packages:
[1] stats4 parallel stats graphics grDevices utils datasets
[8] methods base
other attached packages:
[1] ExperimentHub_1.10.0 AnnotationHub_2.16.0
[3] BiocFileCache_1.8.0 dbplyr_1.4.0
[5] tibble_2.1.1 CellMixS_1.0.0
[7] kSamples_1.2-8 SuppDists_1.1-9.4
[9] DropletUtils_1.3.13 readxl_1.3.1
[11] sctransform_0.2.0 Seurat_3.0.0
[13] scran_1.11.23 Matrix_1.2-17
[15] cowplot_0.9.4 reshape2_1.4.3
[17] dplyr_0.8.1 scater_1.11.13
[19] SingleCellExperiment_1.5.2 SummarizedExperiment_1.13.0
[21] DelayedArray_0.9.9 BiocParallel_1.17.18
[23] matrixStats_0.54.0 Biobase_2.43.1
[25] GenomicRanges_1.35.1 GenomeInfoDb_1.19.2
[27] IRanges_2.17.4 S4Vectors_0.21.21
[29] BiocGenerics_0.29.2 purrr_0.3.2
[31] pheatmap_1.0.12 edgeR_3.25.3
[33] limma_3.39.14 stringr_1.4.0
[35] readr_1.3.1 plotly_4.9.0
[37] ggplot2_3.1.1
loaded via a namespace (and not attached):
[1] reticulate_1.12 R.utils_2.8.0
[3] tidyselect_0.2.5 RSQLite_2.1.1
[5] AnnotationDbi_1.46.0 htmlwidgets_1.3
[7] grid_3.6.0 Rtsne_0.15
[9] munsell_0.5.0 codetools_0.2-16
[11] ica_1.0-2 statmod_1.4.30
[13] future_1.12.0 withr_2.1.2
[15] colorspace_1.4-1 knitr_1.23
[17] rstudioapi_0.10 ROCR_1.0-7
[19] gbRd_0.4-11 listenv_0.7.0
[21] Rdpack_0.11-0 git2r_0.25.2
[23] GenomeInfoDbData_1.2.0 bit64_0.9-7
[25] rhdf5_2.27.15 rprojroot_1.3-2
[27] xfun_0.7 R6_2.4.0
[29] ggbeeswarm_0.6.0 rsvd_1.0.0
[31] locfit_1.5-9.1 bitops_1.0-6
[33] assertthat_0.2.1 promises_1.0.1
[35] SDMTools_1.1-221.1 scales_1.0.0
[37] beeswarm_0.2.3 gtable_0.3.0
[39] npsurv_0.4-0 globals_0.12.4
[41] workflowr_1.3.0 rlang_0.4.0
[43] splines_3.6.0 lazyeval_0.2.2
[45] BiocManager_1.30.4 yaml_2.2.0
[47] backports_1.1.4 httpuv_1.5.1
[49] tools_3.6.0 gplots_3.0.1.1
[51] RColorBrewer_1.1-2 dynamicTreeCut_1.63-1
[53] ggridges_0.5.1 Rcpp_1.0.1
[55] plyr_1.8.4 zlibbioc_1.29.0
[57] RCurl_1.95-4.12 pbapply_1.4-0
[59] viridis_0.5.1 zoo_1.8-5
[61] ggrepel_0.8.0 cluster_2.0.8
[63] fs_1.3.0 magrittr_1.5
[65] data.table_1.12.2 lmtest_0.9-37
[67] RANN_2.6.1 fitdistrplus_1.0-14
[69] hms_0.4.2 lsei_1.2-0
[71] mime_0.6 evaluate_0.13
[73] xtable_1.8-4 gridExtra_2.3
[75] compiler_3.6.0 KernSmooth_2.23-15
[77] crayon_1.3.4 R.oo_1.22.0
[79] htmltools_0.3.6 later_0.8.0
[81] tidyr_0.8.3 DBI_1.0.0
[83] MASS_7.3-51.4 rappdirs_0.3.1
[85] R.methodsS3_1.7.1 gdata_2.18.0
[87] metap_1.1 igraph_1.2.4.1
[89] pkgconfig_2.0.2 vipor_0.4.5
[91] dqrng_0.2.0 XVector_0.23.2
[93] bibtex_0.4.2 digest_0.6.19
[95] tsne_0.1-3 rmarkdown_1.12
[97] cellranger_1.1.0 DelayedMatrixStats_1.5.2
[99] listarrays_0.2.0 curl_3.3
[101] shiny_1.3.2 gtools_3.8.1
[103] nlme_3.1-139 jsonlite_1.6
[105] Rhdf5lib_1.5.4 BiocNeighbors_1.1.12
[107] viridisLite_0.3.0 pillar_1.4.0
[109] lattice_0.20-38 httr_1.4.0
[111] survival_2.44-1.1 interactiveDisplayBase_1.22.0
[113] glue_1.3.1 png_0.1-7
[115] bit_1.1-14 stringi_1.4.3
[117] HDF5Array_1.11.11 blob_1.1.1
[119] BiocSingular_0.99.14 caTools_1.17.1.2
[121] memoise_1.1.0 irlba_2.3.3
[123] future.apply_1.2.0 ape_5.3